do {
cpu = v->processor;
if (cpu != current->processor) {
- spin_unlock_wait(&per_cpu(schedule_data, cpu).schedule_lock);
+ spin_barrier(&per_cpu(schedule_data, cpu).schedule_lock);
/* Flush VHPT on remote processors. */
smp_call_function_single(cpu, &ptc_ga_remote_func,
&args, 0, 1);
#define _raw_spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); (x)->lock = 0; } while (0)
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
-#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef struct {
volatile unsigned int read_counter : 31;
} raw_rwlock_t;
#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
-#define read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define write_can_lock(rw) (*(volatile int *)(rw) == 0)
-
#define _raw_read_lock(rw) \
do { \
raw_rwlock_t *__read_lock_ptr = (rw); \